In [1]:
    
from scipy import optimize, special
import numpy as np
    
In [2]:
    
def func(mu, theta):
    thetamu = theta * mu
    value = np.log(mu) + np.logaddexp(0, thetamu)
    return value
def jac(mu, theta):
    thetamu = theta * mu
    jac = theta * special.expit(thetamu) + 1 / mu
    return jac
def mu_from_theta(theta):
    return optimize.newton(func, 1, fprime=jac, args=(0.4,))
    
In [5]:
    
import sympy
mu = sympy.Function('mu')
theta = sympy.Symbol('theta')
R = mu(theta) + mu(theta) * sympy.exp(theta * mu(theta)) - 1
solution = sympy.solve(R.diff(theta), mu(theta).diff(theta))[0]
solution
    
    Out[5]:
In [9]:
    
import theano
import theano.tensor as tt
import theano.tests.unittest_tools
class MuFromTheta(tt.Op):
    itypes = [tt.dscalar]
    otypes = [tt.dscalar]
    def perform(self, node, inputs, outputs):
        theta, = inputs
        mu = mu_from_theta(theta)
        outputs[0][0] = np.array(mu)
    def grad(self, inputs, g):
        theta, = inputs
        mu = self(theta)
        thetamu = theta * mu
        return [- g[0] * mu ** 2 / (1 + thetamu + tt.exp(-thetamu))]
    
In [10]:
    
import pymc3 as pm
tt_mu_from_theta = MuFromTheta()
with pm.Model() as model:
    theta = pm.HalfNormal('theta', sigma=1)
    mu = pm.Deterministic('mu', tt_mu_from_theta(theta))
    pm.Normal('y', mu=mu, sigma=0.1, observed=[0.2, 0.21, 0.3])
    trace = pm.sample(1000)
    
    
In [11]:
    
pm.traceplot(trace)
    
    
    Out[11]:
    
In [12]:
    
pm.plot_posterior(trace)
    
    Out[12]:
    
In [13]:
    
pm.summary(trace)
    
    Out[13]:
In [ ]: